void disable_local_APIC(void)
{
- unsigned long value;
-
clear_local_APIC();
/*
* Disable APIC (implies clearing of registers
* for 82489DX!).
*/
- value = apic_read(APIC_SPIV);
- value &= ~APIC_SPIV_APIC_ENABLED;
- apic_write_around(APIC_SPIV, value);
+ apic_write_around(APIC_SPIV,
+ apic_read(APIC_SPIV) & ~APIC_SPIV_APIC_ENABLED);
if (enabled_via_apicbase) {
- unsigned int l, h;
- rdmsr(MSR_IA32_APICBASE, l, h);
- l &= ~MSR_IA32_APICBASE_ENABLE;
- wrmsr(MSR_IA32_APICBASE, l, h);
+ uint64_t msr_content;
+ rdmsrl(MSR_IA32_APICBASE, msr_content);
+ wrmsrl(MSR_IA32_APICBASE, msr_content & ~MSR_IA32_APICBASE_ENABLE);
}
}
int lapic_resume(void)
{
- unsigned int l, h;
+ uint64_t msr_content;
unsigned long flags;
int maxlvt;
*/
if ( !x2apic_enabled )
{
- rdmsr(MSR_IA32_APICBASE, l, h);
- l &= ~MSR_IA32_APICBASE_BASE;
- l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
- wrmsr(MSR_IA32_APICBASE, l, h);
+ rdmsrl(MSR_IA32_APICBASE, msr_content);
+ msr_content &= ~MSR_IA32_APICBASE_BASE;
+ wrmsrl(MSR_IA32_APICBASE,
+ msr_content | MSR_IA32_APICBASE_ENABLE | mp_lapic_addr);
}
else
enable_x2apic();
static int __init detect_init_APIC (void)
{
- u32 h, l, features;
+ uint64_t msr_content;
+ u32 features;
/* Disabled by kernel option? */
if (enable_local_apic < 0)
* software for Intel P6 or later and AMD K7
* (Model > 1) or later.
*/
- rdmsr(MSR_IA32_APICBASE, l, h);
- if (!(l & MSR_IA32_APICBASE_ENABLE)) {
+ rdmsrl(MSR_IA32_APICBASE, msr_content);
+ if (!(msr_content & MSR_IA32_APICBASE_ENABLE)) {
printk("Local APIC disabled by BIOS -- reenabling.\n");
- l &= ~MSR_IA32_APICBASE_BASE;
- l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
- wrmsr(MSR_IA32_APICBASE, l, h);
+ msr_content &= ~MSR_IA32_APICBASE_BASE;
+ msr_content |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
+ wrmsrl(MSR_IA32_APICBASE,
+ msr_content | MSR_IA32_APICBASE_ENABLE
+ | APIC_DEFAULT_PHYS_BASE);
enabled_via_apicbase = 1;
}
}
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/* The BIOS may have set up the APIC at some other address */
- rdmsr(MSR_IA32_APICBASE, l, h);
- if (l & MSR_IA32_APICBASE_ENABLE)
- mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
+ rdmsrl(MSR_IA32_APICBASE, msr_content);
+ if (msr_content & MSR_IA32_APICBASE_ENABLE)
+ mp_lapic_addr = msr_content & MSR_IA32_APICBASE_BASE;
if (nmi_watchdog != NMI_NONE)
nmi_watchdog = NMI_LOCAL_APIC;
void enable_x2apic(void)
{
- u32 lo, hi;
+ uint64_t msr_content;
if ( smp_processor_id() == 0 )
{
BUG_ON(!x2apic_enabled); /* APs only enable x2apic when BSP did so. */
}
- rdmsr(MSR_IA32_APICBASE, lo, hi);
- if ( !(lo & MSR_IA32_APICBASE_EXTD) )
+ rdmsrl(MSR_IA32_APICBASE, msr_content);
+ if ( !(msr_content & MSR_IA32_APICBASE_EXTD) )
{
- lo |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD;
- wrmsr(MSR_IA32_APICBASE, lo, 0);
+ msr_content |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD;
+ msr_content = (uint32_t)msr_content;
+ wrmsrl(MSR_IA32_APICBASE, msr_content);
printk("x2APIC mode enabled.\n");
}
else
{
/* This can only be non-zero if selector is NULL. */
if ( nctxt->fs_base )
- wrmsr(MSR_FS_BASE,
- nctxt->fs_base,
- nctxt->fs_base>>32);
+ wrmsrl(MSR_FS_BASE, nctxt->fs_base);
/* Most kernels have non-zero GS base, so don't bother testing. */
/* (This is also a serialising instruction, avoiding AMD erratum #88.) */
- wrmsr(MSR_SHADOW_GS_BASE,
- nctxt->gs_base_kernel,
- nctxt->gs_base_kernel>>32);
+ wrmsrl(MSR_SHADOW_GS_BASE, nctxt->gs_base_kernel);
/* This can only be non-zero if selector is NULL. */
if ( nctxt->gs_base_user )
- wrmsr(MSR_GS_BASE,
- nctxt->gs_base_user,
- nctxt->gs_base_user>>32);
+ wrmsrl(MSR_GS_BASE, nctxt->gs_base_user);
/* If in kernel mode then switch the GS bases around. */
if ( (n->arch.flags & TF_kernel_mode) )
static int collect_cpu_info(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data[cpu];
- uint32_t dummy;
memset(csig, 0, sizeof(*csig));
return -EINVAL;
}
- rdmsr(MSR_AMD_PATCHLEVEL, csig->rev, dummy);
+ rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev);
printk(KERN_INFO "microcode: collect_cpu_info: patch_id=0x%x\n",
csig->rev);
{
unsigned long flags;
struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
- uint32_t rev, dummy;
+ uint32_t rev;
struct microcode_amd *mc_amd = uci->mc.mc_amd;
/* We should bind the task to the CPU */
wrmsrl(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code);
/* get patch id after patching */
- rdmsr(MSR_AMD_PATCHLEVEL, rev, dummy);
+ rdmsrl(MSR_AMD_PATCHLEVEL, rev);
spin_unlock_irqrestore(µcode_update_lock, flags);
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data[cpu_num];
- unsigned int val[2];
+ uint64_t msr_content;
BUG_ON(cpu_num != smp_processor_id());
if ( (c->x86_model >= 5) || (c->x86 > 6) )
{
/* get processor flags from MSR 0x17 */
- rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
- csig->pf = 1 << ((val[1] >> 18) & 7);
+ rdmsrl(MSR_IA32_PLATFORM_ID, msr_content);
+ csig->pf = 1 << ((msr_content >> 50) & 7);
}
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+ wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
/* see notes above for revision 1.07. Apparent chip bug */
sync_core();
/* get the current revision from MSR 0x8B */
- rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
+ rdmsrl(MSR_IA32_UCODE_REV, msr_content);
+ csig->rev = (uint32_t)(msr_content >> 32);
pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
csig->sig, csig->pf, csig->rev);
static int apply_microcode(int cpu)
{
unsigned long flags;
+ uint64_t msr_content;
unsigned int val[2];
int cpu_num = raw_smp_processor_id();
struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num);
spin_lock_irqsave(µcode_update_lock, flags);
/* write microcode via MSR 0x79 */
- wrmsr(MSR_IA32_UCODE_WRITE,
- (unsigned long) uci->mc.mc_intel->bits,
- (unsigned long) uci->mc.mc_intel->bits >> 16 >> 16);
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)uci->mc.mc_intel->bits);
+ wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
/* see notes above for revision 1.07. Apparent chip bug */
sync_core();
/* get the current revision from MSR 0x8B */
- rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+ rdmsrl(MSR_IA32_UCODE_REV, msr_content);
+ val[1] = (uint32_t)(msr_content >> 32);
spin_unlock_irqrestore(µcode_update_lock, flags);
if ( val[1] != uci->mc.mc_intel->hdr.rev )
static int __pminit setup_p4_watchdog(void)
{
- unsigned int misc_enable, dummy;
+ uint64_t misc_enable;
- rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL))
return 0;
clear_msr_range(MSR_P4_BPU_CCCR0, 18);
clear_msr_range(MSR_P4_BPU_PERFCTR0, 18);
- wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
- wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
+ wrmsrl(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0);
+ wrmsrl(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE);
write_watchdog_counter("P4_IQ_COUNTER0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
- wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
+ wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val);
return 1;
}
* - LVTPC is masked on interrupt and must be
* unmasked by the LVTPC handler.
*/
- wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
+ wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 )
unsigned int i;
for (i = 0; i < nr_ctrs; ++i) {
- rdmsr(counters[i].addr,
- counters[i].saved.low,
- counters[i].saved.high);
+ rdmsrl(counters[i].addr, counters[i].value);
}
for (i = 0; i < nr_ctrls; ++i) {
- rdmsr(controls[i].addr,
- controls[i].saved.low,
- controls[i].saved.high);
+ rdmsrl(controls[i].addr, controls[i].value);
}
}
unsigned int i;
for (i = 0; i < nr_ctrls; ++i) {
- wrmsr(controls[i].addr,
- controls[i].saved.low,
- controls[i].saved.high);
+ wrmsrl(controls[i].addr, controls[i].value);
}
for (i = 0; i < nr_ctrs; ++i) {
- wrmsr(counters[i].addr,
- counters[i].saved.low,
- counters[i].saved.high);
+ wrmsrl(counters[i].addr, counters[i].value);
}
}
#ifndef OP_X86_MODEL_H
#define OP_X86_MODEL_H
-struct op_saved_msr {
- unsigned int high;
- unsigned int low;
-};
-
struct op_msr {
unsigned long addr;
- struct op_saved_msr saved;
+ uint64_t value;
};
struct op_msrs {